[IA64][HVM] Add buffer IO mechanism for Xen/VTi domain. Current
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Wed, 25 Oct 2006 14:29:08 +0000 (15:29 +0100)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Wed, 25 Oct 2006 14:29:08 +0000 (15:29 +0100)
implementation can accelerate Windows guest's dense IO operations
at boot time.

Signed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
tools/libxc/ia64/xc_ia64_hvm_build.c
tools/python/xen/xend/image.py
xen/arch/ia64/vmx/mmio.c
xen/arch/ia64/vmx/vmx_init.c
xen/include/asm-ia64/vmx_platform.h
xen/include/asm-ia64/vmx_vcpu.h
xen/include/public/arch-ia64.h

index 2c34b44a1d9d43b440e6e87dc1a3a8d2de00840e..0caaf343b35abd4aa536600c46a9c14e9a75a520 100644 (file)
@@ -551,8 +551,9 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
             char *image, unsigned long image_size, uint32_t vcpus,
             unsigned int store_evtchn, unsigned long *store_mfn)
 {
-    unsigned long page_array[2];
+    unsigned long page_array[3];
     shared_iopage_t *sp;
+    void *ioreq_buffer_page;
     unsigned long dom_memsize = (memsize << 20);
     DECLARE_DOMCTL;
 
@@ -587,7 +588,7 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
 
     /* Retrieve special pages like io, xenstore, etc. */
     if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
-                             IO_PAGE_START>>PAGE_SHIFT, 2) != 2) {
+                             IO_PAGE_START>>PAGE_SHIFT, 3) != 3) {
         PERROR("Could not get the page frame list");
         goto error_out;
     }
@@ -604,7 +605,10 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
 
     memset(sp, 0, PAGE_SIZE);
     munmap(sp, PAGE_SIZE);
-
+    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom,
+                               PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[2]); 
+    memset(ioreq_buffer_page,0,PAGE_SIZE);
+    munmap(ioreq_buffer_page, PAGE_SIZE);
     return 0;
 
 error_out:
index b3ed4417d237a15f1d7bf35179d4d931e24d8bed..8a761d89cf0ddddd50e2a0cd851980cb9fc16cd2 100644 (file)
@@ -471,7 +471,7 @@ class IA64_HVM_ImageHandler(HVMImageHandler):
     def getRequiredAvailableMemory(self, mem_kb):
         page_kb = 16
         # ROM size for guest firmware, ioreq page and xenstore page
-        extra_pages = 1024 + 2
+        extra_pages = 1024 + 3
         return mem_kb + extra_pages * page_kb
 
     def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
index 34ad84666d86be83c3870efdf392ebb5d0fb697e..4e4b1248074ec85921989e5e299c254cc0cb6c18 100644 (file)
@@ -52,6 +52,70 @@ struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
 #define PIB_OFST_INTA           0x1E0000
 #define PIB_OFST_XTP            0x1E0008
 
+#define HVM_BUFFERED_IO_RANGE_NR 1
+
+struct hvm_buffered_io_range {
+    unsigned long start_addr;
+    unsigned long length;
+};
+
+static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
+static struct hvm_buffered_io_range
+*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
+{
+    &buffered_stdvga_range
+};
+
+int hvm_buffered_io_intercept(ioreq_t *p)
+{
+    struct vcpu *v = current;
+    spinlock_t  *buffered_io_lock;
+    buffered_iopage_t *buffered_iopage =
+        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
+    unsigned long tmp_write_pointer = 0;
+    int i;
+
+    /* ignore READ ioreq_t! */
+    if ( p->dir == IOREQ_READ )
+        return 0;
+
+    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
+        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
+             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
+                                     hvm_buffered_io_ranges[i]->length )
+            break;
+    }
+
+    if ( i == HVM_BUFFERED_IO_RANGE_NR )
+        return 0;
+
+    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
+    spin_lock(buffered_io_lock);
+
+    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
+         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
+        /* the queue is full.
+         * send the iopacket through the normal path.
+         * NOTE: The arithimetic operation could handle the situation for
+         * write_pointer overflow.
+         */
+        spin_unlock(buffered_io_lock);
+        return 0;
+    }
+
+    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
+
+    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
+
+    /*make the ioreq_t visible before write_pointer*/
+    wmb();
+    buffered_iopage->write_pointer++;
+
+    spin_unlock(buffered_io_lock);
+
+    return 1;
+}
+
 static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
 
 static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
@@ -156,7 +220,11 @@ static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
     p->df = 0;
 
     p->io_count++;
-
+    if(hvm_buffered_io_intercept(p)){
+        p->state = STATE_IORESP_READY;
+        vmx_io_assist(v);
+        return ;
+    }else 
     vmx_send_assist_req(v);
     if(dir==IOREQ_READ){ //read
         *val=p->u.data;
index df1bc94de0706a3cd8c013741b63e55620876f92..9dae522050eddfe8a03d8274a23fb2856f178860 100644 (file)
@@ -362,8 +362,8 @@ static const io_range_t io_ranges[] = {
        {PIB_START, PIB_SIZE, GPFN_PIB},
 };
 
-/* Reseve 1 page for shared I/O and 1 page for xenstore.  */
-#define VMX_SYS_PAGES  (2 + (GFW_SIZE >> PAGE_SHIFT))
+/* Reseve 1 page for shared I/O ,1 page for xenstore and 1 page for buffer I/O.  */
+#define VMX_SYS_PAGES  (3 + (GFW_SIZE >> PAGE_SHIFT))
 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
 
 static void vmx_build_physmap_table(struct domain *d)
@@ -424,8 +424,12 @@ static void vmx_build_physmap_table(struct domain *d)
        mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
        assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
        list_ent = mfn_to_page(mfn)->list.next;
+       ASSERT(list_ent != &d->page_list);
+    
+    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
+    assign_domain_page(d, BUFFER_IO_PAGE_START, mfn << PAGE_SHIFT);
+    list_ent = mfn_to_page(mfn)->list.next;
        ASSERT(list_ent == &d->page_list);
-
 }
 
 void vmx_setup_platform(struct domain *d)
@@ -436,6 +440,10 @@ void vmx_setup_platform(struct domain *d)
 
        d->arch.vmx_platform.shared_page_va =
                (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
+    //For buffered IO requests.
+    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
+    d->arch.hvm_domain.buffered_io_va =
+        (unsigned long)__va(__gpa_to_mpa(d, BUFFER_IO_PAGE_START));
        /* TEMP */
        d->arch.vmx_platform.pib_base = 0xfee00000UL;
 
index 33c4003cf3df1395dedca38fc02776a21cb8afa2..07d05a68c660ecc51733b91fdc36a2a4ca1bec2a 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/hvm/vioapic.h>
 struct mmio_list;
 typedef struct virtual_platform_def {
+    unsigned long          buffered_io_va;
+    spinlock_t             buffered_io_lock;
     unsigned long       shared_page_va;
     unsigned long       pib_base;
     unsigned char       xtp;
index a82e31fb7f940f6b778124a37236e4401e7289e5..556aee3e011ee50609cb5a6dbb9d7df8636d94bc 100644 (file)
@@ -57,7 +57,6 @@ extern int check_indirect_reg_rsv_fields ( int type, int index, u64 value );
 extern u64 set_isr_ei_ni (VCPU *vcpu);
 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
 
-
 /* next all for VTI domain APIs definition */
 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
index 98bda1d24177e4e2c2e24f36a5dd871d79982ea0..76e0796931a9251e4d87de46132bcf4554a2ad56 100644 (file)
@@ -80,6 +80,9 @@ typedef unsigned long xen_ulong_t;
 #define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
 #define STORE_PAGE_SIZE         PAGE_SIZE
 
+#define BUFFER_IO_PAGE_START (STORE_PAGE_START+PAGE_SIZE)
+#define BUFFER_IO_PAGE_SIZE PAGE_SIZE
+
 #define IO_SAPIC_START   0xfec00000UL
 #define IO_SAPIC_SIZE    0x100000